static void vmx_domain_destroy(struct domain *d)
{
- if ( d->arch.hvm_domain.hap_enabled )
+ if ( paging_mode_hap(d) )
on_each_cpu(__ept_sync_domain, d, 1);
vmx_free_vlapic_mapping(d);
}
if ( old_cr4 != new_cr4 )
write_cr4(new_cr4);
- if ( d->arch.hvm_domain.hap_enabled )
+ if ( paging_mode_hap(d) )
{
unsigned int cpu = smp_processor_id();
/* Test-and-test-and-set this CPU in the EPT-is-synced mask. */
void ept_sync_domain(struct domain *d)
{
/* Only if using EPT and this domain has some VCPUs to dirty. */
- if ( !d->arch.hvm_domain.hap_enabled || !d->vcpu || !d->vcpu[0] )
+ if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
return;
ASSERT(local_irq_is_enabled());
{
unsigned int old_pages;
int rv = 0;
+ uint32_t oldmode;
domain_pause(d);
+
+ oldmode = d->arch.paging.mode;
+ d->arch.paging.mode = mode | PG_HAP_enable;
+
/* error check */
if ( (d == current->domain) )
{
goto out;
}
- d->arch.paging.mode = mode | PG_HAP_enable;
-
out:
+ if (rv)
+ d->arch.paging.mode = oldmode;
domain_unpause(d);
return rv;
}
#define hap_enabled(d) \
- (is_hvm_domain(d) && (d)->arch.hvm_domain.hap_enabled)
+ (is_hvm_domain(d) && paging_mode_hap(d))
#define mem_sharing_enabled(d) \
(is_hvm_domain(d) && (d)->arch.hvm_domain.mem_sharing_enabled)
if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
L3_PAGETABLE_SHIFT - PAGE_SHIFT,
((CONFIG_PAGING_LEVELS == 3)
- ? (d->arch.hvm_domain.hap_enabled ? 4 : 8)
+ ? (paging_mode_hap(d) ? 4 : 8)
: L3_PAGETABLE_ENTRIES),
PGT_l2_page_table) )
goto out;
p2m->get_entry_current = p2m_gfn_to_mfn_current;
p2m->change_entry_type_global = p2m_change_type_global;
- if ( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled &&
+ if ( is_hvm_domain(d) && paging_mode_hap(d) &&
(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) )
ept_p2m_init(d);
while ( todo )
{
- if ( is_hvm_domain(d) && d->arch.hvm_domain.hap_enabled )
+ if ( is_hvm_domain(d) && paging_mode_hap(d) )
order = (((gfn | mfn_x(mfn) | todo) & (SUPERPAGE_PAGES - 1)) == 0) ?
9 : 0;
else